#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import tables
import numpy as np
import pandas as pd
import seaborn as sns
import os
## NDVIの処理
path = "./Data/JapanAttributeData_NDVI"
files = os.listdir(path)
ff = files[0:]
# 都道府県名リスト
prelist = ["Hokkaido","Aomori", "Iwate", "Miyagi", "Akita", "Yamagata",
"Fukushima", "Ibaraki","Tochigi","Gunma","Saitama","Chiba",
"Tokyo","Kanagawa", "Niigata", "Toyama", "Ishikawa","Fukui",
"Yamanashi","Nagano","Gifu","Shizuoka","Aichi","Mie",
"Shiga","Kyoto","Osaka","Hyogo","Nara","Wakayama","Tottori",
"Shimane", "Okayama","Hiroshima","Yamaguchi","Tokushima",
"Kagawa","Ehime","Kochi", "Fukuoka","Saga","Nagasaki",
"Kumamoto","Oita","Miyazaki","Kagoshima","Okinawa"]
X = len(prelist)
mylist1 = list(range(0,X))
ff[0][6:13]
os.makedirs("./Data/Prefecture_NDVI/", exist_ok=True)
# 都道府県ごとのフォルダを作る
for j in range(len(prelist)):
os.makedirs("./Data/Prefecture_NDVI/" + prelist[j], exist_ok=True)
# ばらす
for i in range(len(ff)):
City = pd.read_csv(path + "/" + ff[i],header=0,engine="python")
# いつのデータか
when = ff[i][6:13]
for g in range(len(prelist)):
CITY_1 = City[(City["JCODE"] > mylist1[g]*1000 + 999) &
(City["JCODE"] < (mylist1[g]+1)*1000 + 999)]
### 都道府県データを取り出す
CIT = pd.concat([CITY_1],ignore_index=True)
CIT_prefecture = CIT.loc[:,['X','Y','NDVI']]
CIT_prefecture.to_csv("./Data/Prefecture_NDVI/"
+ str(prelist[g])+ "/" + str(prelist[g])
+ '_ZENTAI_' + str(when) + '_NDVI.csv')
### 市区町村ごとにデータを取り出す
# 市区町村名の列ラベルを参照
CIT_name1 = list(CIT["CITY_ENG"])
# 順番をそろえつつ重複を消す処理
CIT_name01 = sorted(set(CIT_name1), key=CIT_name1.index)
CIT_name01 = CIT_name01[0:-1]
for h in range(len(CIT_name01)):
C001 = CIT[CIT["CITY_ENG"] == CIT_name01[h]]
CC11 = C001.loc[:,['X','Y','NDVI']]
CC11.to_csv("./Data/Prefecture_NDVI/"
+ str(prelist[g]) + "/" + str(CIT_name01[h])
+ '_' + str(when) + '_NDVI.csv')
## LSTの処理
path = "./Data/JapanAttributeData_LST"
files = os.listdir(path)
ff = files[0:]
# 都道府県名リスト
prelist = ["Hokkaido","Aomori", "Iwate", "Miyagi", "Akita", "Yamagata",
"Fukushima", "Ibaraki","Tochigi","Gunma","Saitama","Chiba",
"Tokyo","Kanagawa", "Niigata", "Toyama", "Ishikawa","Fukui",
"Yamanashi","Nagano","Gifu","Shizuoka","Aichi","Mie",
"Shiga","Kyoto","Osaka","Hyogo","Nara","Wakayama","Tottori",
"Shimane", "Okayama","Hiroshima","Yamaguchi","Tokushima",
"Kagawa","Ehime","Kochi", "Fukuoka","Saga","Nagasaki",
"Kumamoto","Oita","Miyazaki","Kagoshima","Okinawa"]
X = len(prelist)
mylist1 = list(range(0,X))
os.makedirs("./Data/Prefecture_LST/", exist_ok=True)
# 都道府県ごとのフォルダを作る
for j in range(len(prelist)):
os.makedirs("./Data/Prefecture_LST/" + prelist[j], exist_ok=True)
# ばらす
for i in range(len(ff)):
City = pd.read_csv(path + "/" + ff[i],header=0,engine="python")
# いつのデータか
when = ff[i][5:12]
for g in range(len(prelist)):
CITY_1 = City[(City["JCODE"] > mylist1[g]*1000 + 999) &
(City["JCODE"] < (mylist1[g]+1)*1000 + 999)]
### 都道府県データを取り出す
CIT = pd.concat([CITY_1],ignore_index=True)
CIT_prefecture = CIT.loc[:,['X','Y','LST']]
CIT_prefecture.to_csv("./Data/Prefecture_LST/"
+ str(prelist[g])+ "/" + str(prelist[g])
+ '_ZENTAI_' + str(when) + '_LST.csv')
### 市区町村ごとにデータを取り出す
# 市区町村名の列ラベルを参照
CIT_name1 = list(CIT["CITY_ENG"])
# 順番をそろえつつ重複を消す処理
CIT_name01 = sorted(set(CIT_name1), key=CIT_name1.index)
CIT_name01 = CIT_name01[0:-1]
for h in range(len(CIT_name01)):
C001 = CIT[CIT["CITY_ENG"] == CIT_name01[h]]
CC11 = C001.loc[:,['X','Y','LST']]
CC11.to_csv("./Data/Prefecture_LST/" + str(prelist[g])
+ "/" + str(CIT_name01[h]) + '_' + str(when) + '_LST.csv')
[GRASSのインストール、標高データを用いた地滑り危険度マップの作成]
[植生指数 (NDVI) の計算、表示]
[標高データ (SRTM)の表示、植生指数 (NDVI) の3次元表示]
[反射率、輝度温度、標高データを用いた土地被覆分類]
[QGIS, Rを用いた公示地価データの空間統計分析:空間的自己回帰モデル]
[QGIS, Rを用いた公示地価データの空間統計分析:静的な時空間モデリング]
[Rを用いた衛星データ(LST, NDVI)の空間統計分析]
[Pythonを用いた空間統計分析のための衛星データ(LST, NDVI)の処理1(市町村別データの生成)]
[Pythonを用いた空間統計分析のための衛星データ(LST, NDVI)の処理2(人口データの重みを加味した市町村別データの生成)]